[IA64] fix memory leak when domVTI is created
authorawilliam@xenbuild.aw <awilliam@xenbuild.aw>
Thu, 3 Aug 2006 17:05:59 +0000 (11:05 -0600)
committerawilliam@xenbuild.aw <awilliam@xenbuild.aw>
Thu, 3 Aug 2006 17:05:59 +0000 (11:05 -0600)
privregs clean up.
memory leak occures when VT-i domain is created.
When domain is created, xend sets max vcpu before domain setup.
So alloc_vcpu_struct() think the domain is normal domU, not domVTI.
And next xend set the domain as domVTI. so the memory is allocated for
domU won't be freed.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
xen/arch/ia64/asm-offsets.c
xen/arch/ia64/xen/dom0_ops.c
xen/arch/ia64/xen/domain.c
xen/include/asm-ia64/domain.h

index c0c8bb2e37b74dfcbd743e3906bbb077c30fabb4..5cb8fab9ee296377cc90865ce2ecdd65eb6bbede 100644 (file)
@@ -32,6 +32,7 @@ void foo(void)
        DEFINE(IA64_CPU_SIZE, sizeof (struct cpuinfo_ia64));
        DEFINE(UNW_FRAME_INFO_SIZE, sizeof (struct unw_frame_info));
        DEFINE(SHARED_INFO_SIZE, sizeof (struct shared_info));
+       DEFINE(MAPPED_REGS_T_SIZE, sizeof (mapped_regs_t));
 
        BLANK();
        DEFINE(IA64_MCA_CPU_INIT_STACK_OFFSET, offsetof (struct ia64_mca_cpu, init_stack));
index 4c61bb17e1883f52c9143ebc42282124d11a7187..62adf91fc341e029b7c15bfca2c46797c70db584 100644 (file)
@@ -122,6 +122,15 @@ long arch_do_dom0_op(dom0_op_t *op, XEN_GUEST_HANDLE(dom0_op_t) u_dom0_op)
                     ret = -EINVAL;
                     break;
                 }
+                if (!d->arch.is_vti) {
+                    struct vcpu *v;
+                    for_each_vcpu(d, v) {
+                        BUG_ON(v->arch.privregs == NULL);
+                        free_domheap_pages(virt_to_page(v->arch.privregs),
+                                      get_order_from_shift(XMAPPEDREGS_SHIFT));
+                        relinquish_vcpu_resources(v);
+                    }
+                }
                 d->arch.is_vti = 1;
                 vmx_setup_platform(d);
             }
index 25c3c854b831594e81067ffac6630ffc229f76ee..a421764c2a38516ca27ff271b7c21d52271d7d6a 100644 (file)
@@ -236,6 +236,14 @@ void startup_cpu_idle_loop(void)
        continue_cpu_idle_loop();
 }
 
+/* compile time test for get_order(sizeof(mapped_regs_t)) !=
+ * get_order_from_shift(XMAPPEDREGS_SHIFT))
+ */
+#if !(((1 << (XMAPPEDREGS_SHIFT - 1)) < MAPPED_REGS_T_SIZE) && \
+      (MAPPED_REGS_T_SIZE < (1 << (XMAPPEDREGS_SHIFT + 1))))
+# error "XMAPPEDREGS_SHIFT doesn't match sizeof(mapped_regs_t)."
+#endif
+
 struct vcpu *alloc_vcpu_struct(struct domain *d, unsigned int vcpu_id)
 {
        struct vcpu *v;
@@ -261,13 +269,17 @@ struct vcpu *alloc_vcpu_struct(struct domain *d, unsigned int vcpu_id)
 
        if (!is_idle_domain(d)) {
            if (!d->arch.is_vti) {
-               /* Create privregs page only if not VTi.  */
-               v->arch.privregs = 
-                   alloc_xenheap_pages(get_order(sizeof(mapped_regs_t)));
+               int order;
+               int i;
+
+               /* Create privregs page only if not VTi. */
+               order = get_order_from_shift(XMAPPEDREGS_SHIFT);
+               v->arch.privregs = alloc_xenheap_pages(order);
                BUG_ON(v->arch.privregs == NULL);
-               memset(v->arch.privregs, 0, PAGE_SIZE);
-               share_xen_page_with_guest(virt_to_page(v->arch.privregs),
-                                         d, XENSHARE_writable);
+               memset(v->arch.privregs, 0, 1 << XMAPPEDREGS_SHIFT);
+               for (i = 0; i < (1 << order); i++)
+                   share_xen_page_with_guest(virt_to_page(v->arch.privregs) +
+                                             i, d, XENSHARE_writable);
            }
 
            v->arch.metaphysical_rr0 = d->arch.metaphysical_rr0;
@@ -295,15 +307,21 @@ struct vcpu *alloc_vcpu_struct(struct domain *d, unsigned int vcpu_id)
        return v;
 }
 
+void relinquish_vcpu_resources(struct vcpu *v)
+{
+    if (v->arch.privregs != NULL) {
+        free_xenheap_pages(v->arch.privregs,
+                           get_order_from_shift(XMAPPEDREGS_SHIFT));
+        v->arch.privregs = NULL;
+    }
+}
+
 void free_vcpu_struct(struct vcpu *v)
 {
        if (VMX_DOMAIN(v))
                vmx_relinquish_vcpu_resources(v);
-       else {
-               if (v->arch.privregs != NULL)
-                       free_xenheap_pages(v->arch.privregs,
-                                     get_order_from_shift(XMAPPEDREGS_SHIFT));
-       }
+       else
+               relinquish_vcpu_resources(v);
 
        free_xenheap_pages(v, KERNEL_STACK_SIZE_ORDER);
 }
index 9546adbc62b5663efe6f0927bfed50c32063b7ac..9c565cff9f24d7272765298dd587afcfae5a0b1f 100644 (file)
@@ -37,6 +37,8 @@ p2m_entry_retry(struct p2m_entry* entry)
 }
 
 extern void domain_relinquish_resources(struct domain *);
+struct vcpu;
+extern void relinquish_vcpu_resources(struct vcpu *v);
 
 /* given a current domain metaphysical address, return the physical address */
 extern unsigned long translate_domain_mpaddr(unsigned long mpaddr,